bitkeeper revision 1.1236.32.8 (4237887fr1Mo71Tp0RoJHmt875tSBg)
authormafetter@fleming.research <mafetter@fleming.research>
Wed, 16 Mar 2005 01:14:39 +0000 (01:14 +0000)
committermafetter@fleming.research <mafetter@fleming.research>
Wed, 16 Mar 2005 01:14:39 +0000 (01:14 +0000)
Added extra shadow_sync_mfn() in do_update_va_mapping to deal
with a shortcoming of the checking code in _check_pagetable.
Better to have a few more flushes and checking code that can
still be used.  It would be even better to have smarter checking
code, but that will take more time.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
xen/arch/x86/mm.c
xen/include/xen/perfc_defn.h

index 69c55a185f9cb3e0e921e9319cde18777198bf64..19b01667d466cb50d6ad3d90b90bfba55d160d2d 100644 (file)
@@ -2001,6 +2001,8 @@ int do_update_va_mapping(unsigned long va,
     }
     else
     {
+        unsigned long l1mfn;
+
         if ( unlikely(percpu_info[cpu].foreign &&
                       (shadow_mode_translate(d) ||
                        shadow_mode_translate(percpu_info[cpu].foreign))) )
@@ -2021,6 +2023,29 @@ int do_update_va_mapping(unsigned long va,
         //
         __shadow_sync_va(ed, va);
 
+#if 1 /* keep check_pagetables() happy */
+        /*
+         * However, the above doesn't guarantee that there's no snapshot of
+         * the L1 table in question; it just says that the relevant L2 and L1
+         * entries for VA are in-sync.  There might still be a snapshot.
+         *
+         * The checking code in _check_pagetables() assumes that no one will
+         * mutate the shadow of a page that has a snapshot.  It's actually
+         * OK to not sync this page, but it seems simpler to:
+         * 1) keep all code paths the same, and
+         * 2) maintain the invariant for _check_pagetables(), rather than try
+         *    to teach it about this boundary case.
+         * So we flush this L1 page, if it's out of sync.
+         */
+        l1mfn = (l2_pgentry_val(linear_l2_table(ed)[l2_table_offset(va)]) >>
+                 PAGE_SHIFT);
+        if ( mfn_out_of_sync(l1mfn) )
+        {
+            perfc_incrc(extra_va_update_sync);
+            __shadow_sync_mfn(d, l1mfn);
+        }
+#endif /* keep check_pagetables() happy */
+
         if ( unlikely(__put_user(val, &l1_pgentry_val(
                                      linear_pg_table[l1_linear_offset(va)]))) )
             err = -EINVAL;
index 4402020a6da9b224a3f2a190ddd06c2a45340443..e4b506171bceb2a860e5336abcfdc8adeaab95b4 100644 (file)
@@ -50,6 +50,7 @@ PERFCOUNTER_CPU(shadow_sync_all,                   "calls to shadow_sync_all")
 PERFCOUNTER_CPU(shadow_make_snapshot,              "snapshots created")
 PERFCOUNTER_CPU(shadow_mark_mfn_out_of_sync_calls, "calls to shadow_mk_out_of_sync")
 PERFCOUNTER_CPU(shadow_out_of_sync_calls,          "calls to shadow_out_of_sync")
+PERFCOUNTER_CPU(extra_va_update_sync,              "extra syncs for bug in chk_pgtb")
 PERFCOUNTER_CPU(snapshot_entry_matches_calls,      "calls to ss_entry_matches")
 PERFCOUNTER_CPU(snapshot_entry_matches_true,       "ss_entry_matches returns true")